{
int i;
unsigned long *gpl1e, *spl1e;
+ int cpu = current->processor;
+ int errors = 0;
+
+ // First check to see if this guest page is currently the active
+ // PTWR page. If so, then we compare the (old) cached copy of the
+ // guest page to the shadow, and not the currently writable (and
+ // thus potentially out-of-sync) guest page.
+ //
+ if ( VM_ASSIST(d, VMASST_TYPE_writable_pagetables) )
+ {
+ for ( i = 0; i < ARRAY_SIZE(ptwr_info->ptinfo); i++)
+ {
+ if ( ptwr_info[cpu].ptinfo[i].l1va &&
+ ((v2m(ptwr_info[cpu].ptinfo[i].pl1e) >> PAGE_SHIFT) == gmfn) )
+ {
+ unsigned long old = gmfn;
+ gmfn = (v2m(ptwr_info[cpu].ptinfo[i].page) >> PAGE_SHIFT);
+ printk("hit1 ptwr_info[%d].ptinfo[%d].l1va, mfn=0x%08x, snapshot=0x%08x\n",
+ cpu, i, old, gmfn);
+ }
+ }
+ }
- gpl1e = map_domain_mem(g2mfn << PAGE_SHIFT);
- spl1e = map_domain_mem(s2mfn << PAGE_SHIFT);
+ gpl1e = map_domain_mem(gmfn << PAGE_SHIFT);
+ spl1e = map_domain_mem(smfn << PAGE_SHIFT);
- for ( i = 0; i < ENTRIES_PER_L1_PAGETABLE; i++ )
+ for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
- check_pte(d, &gpl1e[i], &spl1e[i], 1, i);
+ errors += check_pte(d, &gpl1e[i], &spl1e[i], 1, l2_idx, i);
unmap_domain_mem(spl1e);
unmap_domain_mem(gpl1e);
PERFSTATUS( shadow_l2_pages, "current # shadow L2 pages" )
PERFSTATUS( shadow_l1_pages, "current # shadow L1 pages" )
+ PERFCOUNTER_CPU( check_pagetable, "calls to check_pagetable" )
+ PERFCOUNTER_CPU( check_all_pagetables, "calls to check_all_pagetables" )
++
+#define VMX_PERF_EXIT_REASON_SIZE 37
+#define VMX_PERF_VECTOR_SIZE 0x20
+PERFCOUNTER_ARRAY(vmexits, "vmexits", VMX_PERF_EXIT_REASON_SIZE )
+PERFCOUNTER_ARRAY(cause_vector, "cause vector", VMX_PERF_VECTOR_SIZE )